Rename fields in arch_exec_domain to be more uniform.
Promote vmx_shadow_invlpg() to shadow_invlpg().
}
/*
- * Free the pages for monitor_table and guest_pl2e_cache
+ * Free the pages for monitor_table and hl2_table
*/
static void free_monitor_pagetable(struct exec_domain *ed)
{
mpl2e = (l2_pgentry_t *)
map_domain_mem(pagetable_val(ed->arch.monitor_table));
/*
- * First get the pfn for guest_pl2e_cache by looking at monitor_table
+ * First get the pfn for hl2_table by looking at monitor_table
*/
mpfn = l2_pgentry_val(mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT])
>> PAGE_SHIFT;
/* We don't call update_pagetables() as we actively want fields such as
* the linear_pg_table to be null so that we bail out early of
* shadow_fault in case the vmx guest tries illegal accesses with
- * paging turned of.
+ * paging turned off.
*/
//update_pagetables(ed); /* this assigns shadow_pagetable */
alloc_monitor_pagetable(ed); /* this assigns monitor_pagetable */
}
}
-#ifdef CONFIG_VMX
-void vmx_shadow_invlpg(struct domain *d, unsigned long va)
+void shadow_invlpg(struct exec_domain *ed, unsigned long va)
{
- unsigned long gpte, spte, host_pfn;
+ unsigned long gpte, spte;
+
+ ASSERT(shadow_mode_enabled(ed->domain));
if (__put_user(0L, (unsigned long *)
&shadow_linear_pg_table[va >> PAGE_SHIFT])) {
- vmx_shadow_clear_state(d);
+ vmx_shadow_clear_state(ed->domain);
return;
}
return;
}
- host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
- spte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
+ l1pte_propagate_from_guest(ed->domain, &gpte, &spte);
if (__put_user(spte, (unsigned long *)
&shadow_linear_pg_table[va >> PAGE_SHIFT])) {
return;
}
}
-#endif
int shadow_fault(unsigned long va, long error_code)
{
struct exec_domain *ed = current;
struct domain *d = ed->domain;
- SH_VVLOG("shadow_fault( va=%p, code=%ld )", va, error_code );
+ SH_VVLOG("shadow_fault( va=%p, code=%lu )", va, error_code );
check_pagetable(d, ed->arch.guest_table, "pre-sf");
unsigned long eip;
unsigned long gpa;
int result;
- struct exec_domain *ed = current;
#if VMX_DEBUG
{
* If vpagetable is zero, then we are still emulating 1:1 page tables,
* and we should have never gotten here.
*/
- if ( !ed->arch.vpagetable )
+ if ( !current->arch.guest_vtable )
{
printk("vmx_do_page_fault while still running on 1:1 page table\n");
return 0;
* We do the safest things first, then try to update the shadow
* copying from guest
*/
- vmx_shadow_invlpg(ed->domain, va);
+ shadow_invlpg(ed, va);
index = (va >> L2_PAGETABLE_SHIFT);
- ed->arch.guest_pl2e_cache[index] =
+ ed->arch.hl2_vtable[index] =
mk_l2_pgentry(0); /* invalidate pgd cache */
}
-static inline void guest_pl2e_cache_invalidate(struct exec_domain *ed)
+static inline void hl2_table_invalidate(struct exec_domain *ed)
{
/*
* Need to optimize this
*/
- memset(ed->arch.guest_pl2e_cache, 0, PAGE_SIZE);
+ memset(ed->arch.hl2_vtable, 0, PAGE_SIZE);
}
static void vmx_io_instruction(struct xen_regs *regs,
break;
}
- guest_pl2e_cache_invalidate(d);
+ hl2_table_invalidate(d);
/*
* We make a new one if the shadow does not exist.
*/
if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
vmx_shadow_clear_state(d->domain);
shadow_invalidate(d);
- guest_pl2e_cache_invalidate(d);
+ hl2_table_invalidate(d);
}
break;
default:
void vmx_do_resume(struct exec_domain *d)
{
- if ( d->arch.vpagetable )
+ if ( d->arch.guest_vtable )
__vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
else
// we haven't switched off the 1:1 pagetable yet...
struct host_execution_env host_env;
struct Xgt_desc_struct desc;
struct list_head *list_ent;
- l2_pgentry_t *mpl2e, *guest_pl2e_cache;
+ l2_pgentry_t *mpl2e, *hl2_vtable;
unsigned long i, pfn = 0;
struct pfn_info *page;
execution_context_t *ec = get_execution_context();
mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry((pfn << PAGE_SHIFT)| __PAGE_HYPERVISOR);
- guest_pl2e_cache = map_domain_mem(pfn << PAGE_SHIFT);
- memset(guest_pl2e_cache, 0, PAGE_SIZE); /* clean it up */
- ed->arch.guest_pl2e_cache = guest_pl2e_cache;
+ hl2_vtable = map_domain_mem(pfn << PAGE_SHIFT);
+ memset(hl2_vtable, 0, PAGE_SIZE); /* clean it up */
+ ed->arch.hl2_vtable = hl2_vtable;
unmap_domain_mem(mpl2e);
* The stack frame for events is exactly that of an x86 hardware interrupt.
* The stack frame for a failsafe callback is augmented with saved values
* for segment registers %ds, %es, %fs and %gs:
- * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
+ * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
*/
unsigned long event_selector; /* entry CS (x86/32 only) */
pagetable_t guest_table_user; /* x86/64: user-space pagetable. */
pagetable_t guest_table; /* guest notion of cr3 */
pagetable_t shadow_table; /* shadow of guest */
+ pagetable_t hl2_table; /* shortcut to guest's L1 PTEs */
pagetable_t monitor_table; /* used in hypervisor */
pagetable_t phys_table; /* guest 1:1 pagetable */
- l2_pgentry_t *vpagetable; /* virtual address of pagetable */
- l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */
- l2_pgentry_t *guest_pl2e_cache; /* guest page directory cache */
+ l2_pgentry_t *guest_vtable; /* virtual address of pagetable */
+ l2_pgentry_t *shadow_vtable; /* virtual address of shadow_table */
+ l2_pgentry_t *hl2_vtable; /* virtual address of hl2_table */
+ l2_pgentry_t *monitor_vtable; /* virtual address of monitor_table */
/* Virtual CR2 value. Can be read/written by guest. */
unsigned long guest_cr2;
extern void unshadow_table(unsigned long gpfn, unsigned int type);
extern int shadow_mode_enable(struct domain *p, unsigned int mode);
extern void free_shadow_state(struct domain *d);
+extern void shadow_invlpg(struct exec_domain *, unsigned long);
#ifdef CONFIG_VMX
extern void vmx_shadow_clear_state(struct domain *);
-extern void vmx_shadow_invlpg(struct domain *, unsigned long);
#endif
#define __mfn_to_gpfn(_d, mfn) \
*sl2e = l2_pgentry_val(
shadow_linear_l2_table[l2_table_offset(va)]);
}
- else {
- BUG(); /* why do we need this case? */
- *sl2e = l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
- }
+ else
+ BUG();
}
static inline void __shadow_set_l2e(
shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
}
else
- {
- BUG(); /* why do we need this case? */
- linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
- }
+ BUG();
}
static inline void __guest_get_l2e(
struct exec_domain *ed, unsigned long va, unsigned long *l2e)
{
*l2e = ( shadow_mode_translate(ed->domain) ) ?
- l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) :
+ l2_pgentry_val(ed->arch.guest_vtable[l2_table_offset(va)]) :
l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
}
unsigned long pfn;
pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
- ed->arch.guest_pl2e_cache[l2_table_offset(va)] =
+ ed->arch.hl2_vtable[l2_table_offset(va)] =
mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
- ed->arch.vpagetable[l2_table_offset(va)] = mk_l2_pgentry(value);
+ ed->arch.guest_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
}
else
{
/* unmap the old mappings */
if ( ed->arch.shadow_vtable )
unmap_domain_mem(ed->arch.shadow_vtable);
- if ( ed->arch.vpagetable )
- unmap_domain_mem(ed->arch.vpagetable);
+ if ( ed->arch.guest_vtable )
+ unmap_domain_mem(ed->arch.guest_vtable);
/* new mapping */
mpl2e = (l2_pgentry_t *)
map_domain_mem(pagetable_val(ed->arch.monitor_table));
+ // mafetter: why do we need to keep setting up shadow_linear_pg_table for
+ // this monitor page table? Seems unnecessary...
+ //
mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
__flush_tlb_one(SH_LINEAR_PT_VIRT_START);
memset(spl2e, 0, L2_PAGETABLE_ENTRIES * sizeof(l2_pgentry_t));
ed->arch.shadow_vtable = spl2e;
- ed->arch.vpagetable = gpl2e; /* expect the guest did clean this up */
+ ed->arch.guest_vtable = gpl2e; /* expect the guest did clean this up */
unmap_domain_mem(mpl2e);
}
index = (gva >> L2_PAGETABLE_SHIFT);
- if (!l2_pgentry_val(ed->arch.guest_pl2e_cache[index])) {
+ if (!l2_pgentry_val(ed->arch.hl2_vtable[index])) {
pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
- ed->arch.guest_pl2e_cache[index] =
+ ed->arch.hl2_vtable[index] =
mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
}
ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
if ( !shadow_mode_external(ed->domain) )
+ // mafetter: why do we need to keep overwriting
+ // ed->arch.monitor_table? Seems unnecessary...
+ //
ed->arch.monitor_table = ed->arch.shadow_table;
}
}
#ifdef __x86_64__
else if ( !(ed->arch.flags & TF_kernel_mode) )
+ // mafetter: why do we need to keep overwriting
+ // ed->arch.monitor_table? Seems unnecessary...
+ //
ed->arch.monitor_table = ed->arch.guest_table_user;
#endif
else
+ // mafetter: why do we need to keep overwriting
+ // ed->arch.monitor_table? Seems unnecessary...
+ //
ed->arch.monitor_table = ed->arch.guest_table;
}